case PGT_l1_page_table:
free_l1_table(page);
if ( unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(current, page-frame_table) & PSH_shadowed) )
+ (get_shadow_status(¤t->mm,
+ page-frame_table) & PSH_shadowed) )
{
unshadow_table( page-frame_table, type );
- put_shadow_status(current);
+ put_shadow_status(¤t->mm);
}
return;
case PGT_l2_page_table:
free_l2_table(page);
if ( unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(current, page-frame_table) & PSH_shadowed) )
+ (get_shadow_status(¤t->mm,
+ page-frame_table) & PSH_shadowed) )
{
unshadow_table( page-frame_table, type );
- put_shadow_status(current);
+ put_shadow_status(¤t->mm);
}
return;
old_base_pfn = pagetable_val(current->mm.pagetable) >> PAGE_SHIFT;
current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
- if( unlikely(current->mm.shadow_mode))
- {
- current->mm.shadow_table =
- shadow_mk_pagetable(current, pfn<<PAGE_SHIFT);
- write_cr3_counted(pagetable_val(current->mm.shadow_table));
- }
- else
- {
- write_cr3_counted(pfn << PAGE_SHIFT);
- }
+ shadow_mk_pagetable(¤t->mm);
+
+ write_ptbase(¤t->mm);
+
put_page_and_type(&frame_table[old_base_pfn]);
}
else
mk_l1_pgentry(req.val));
if ( okay && unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(current, page-frame_table) &
+ (get_shadow_status(¤t->mm, page-frame_table) &
PSH_shadowed) )
{
shadow_l1_normal_pt_update( req.ptr, req.val,
&prev_spfn, &prev_spl1e );
- put_shadow_status(current);
+ put_shadow_status(¤t->mm);
}
put_page_type(page);
pfn);
if ( okay && unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(current, page-frame_table) &
+ (get_shadow_status(¤t->mm, page-frame_table) &
PSH_shadowed) )
{
shadow_l2_normal_pt_update( req.ptr, req.val );
- put_shadow_status(current);
+ put_shadow_status(¤t->mm);
}
put_page_type(page);
if ( deferred_ops & DOP_FLUSH_TLB )
{
- if ( unlikely(current->mm.shadow_mode) )
- {
- check_pagetable( current,
- current->mm.pagetable, "pre-stlb-flush" );
- write_cr3_counted(pagetable_val(current->mm.shadow_table));
- }
- else
- write_cr3_counted(pagetable_val(current->mm.pagetable));
+ write_ptbase(¤t->mm);
}
if ( deferred_ops & DOP_RELOAD_LDT )
if ( unlikely(deferred_ops & DOP_FLUSH_TLB) ||
unlikely(flags & UVMF_FLUSH_TLB) )
{
- if ( unlikely(p->mm.shadow_mode) )
- write_cr3_counted(pagetable_val(p->mm.shadow_table));
- else
- write_cr3_counted(pagetable_val(p->mm.pagetable));
+ write_ptbase(&p->mm);
}
else if ( unlikely(flags & UVMF_INVLPG) )
__flush_tlb_one(page_nr << PAGE_SHIFT);
********/
-int shadow_mode_control( struct task_struct *p, unsigned int op )
+static inline void free_shadow_page( struct mm_struct *m, unsigned int pfn )
{
- if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF )
+ unsigned long flags;
+
+ m->shadow_page_count--;
+
+ spin_lock_irqsave(&free_list_lock, flags);
+ list_add(&frame_table[pfn].list, &free_list);
+ free_pfns++;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+}
+
+static void __free_shadow_table( struct mm_struct *m )
+{
+ int j;
+ struct shadow_status *a;
+
+ // the code assumes you're not using the page tables i.e.
+ // the domain is stopped and cr3 is something else!!
+
+ // walk the hash table and call free_shadow_page on all pages
+
+ for(j=0;j<shadow_ht_buckets;j++)
{
- shadow_mode_disable(p);
+ a = &m->shadow_ht[j];
+ if (a->pfn)
+ {
+ free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask );
+ a->pfn = 0;
+ a->spfn_and_flags = 0;
+ }
+ a=a->next;
+ while(a)
+ {
+ struct shadow_status *next = a->next;
+ free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask );
+ a->pfn = 0;
+ a->spfn_and_flags = 0;
+ a->next = m->shadow_ht_free;
+ m->shadow_ht_free = a;
+ a=next;
+ }
}
- else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST )
- {
- shadow_mode_disable(p);
- shadow_mode_enable(p, SHM_test);
- }
- else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH )
- {
- //shadow_mode_flush(p);
- }
- else
- {
- return -EINVAL;
- }
-
- return 0;
}
-int shadow_mode_enable( struct task_struct *p, unsigned int mode )
+
+int shadow_mode_enable( struct mm_struct *m, unsigned int mode )
{
struct shadow_status **fptr;
int i;
- // sychronously stop domain
- // XXX for the moment, only use on already stopped domains!!!
- spin_lock_init(&p->mm.shadow_lock);
- spin_lock(&p->mm.shadow_lock);
+ spin_lock_init(&m->shadow_lock);
+ spin_lock(&m->shadow_lock);
- p->mm.shadow_mode = mode;
+ m->shadow_mode = mode;
// allocate hashtable
- p->mm.shadow_ht = kmalloc( shadow_ht_buckets *
+ m->shadow_ht = kmalloc( shadow_ht_buckets *
sizeof(struct shadow_status), GFP_KERNEL );
- if( ! p->mm.shadow_ht )
+ if( ! m->shadow_ht )
goto nomem;
- memset( p->mm.shadow_ht, 0, shadow_ht_buckets *
+ memset( m->shadow_ht, 0, shadow_ht_buckets *
sizeof(struct shadow_status) );
// allocate space for first lot of extra nodes
- p->mm.shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size *
+ m->shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size *
sizeof(struct shadow_status)), GFP_KERNEL );
- if( ! p->mm.shadow_ht_extras )
+ if( ! m->shadow_ht_extras )
goto nomem;
- memset( p->mm.shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size *
+ memset( m->shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size *
sizeof(struct shadow_status)) );
// add extras to free list
- fptr = &p->mm.shadow_ht_free;
+ fptr = &m->shadow_ht_free;
for ( i=0; i<shadow_ht_extra_size; i++ )
{
- *fptr = &p->mm.shadow_ht_extras[i];
- fptr = &(p->mm.shadow_ht_extras[i].next);
+ *fptr = &m->shadow_ht_extras[i];
+ fptr = &(m->shadow_ht_extras[i].next);
}
*fptr = NULL;
- *((struct shadow_status ** ) &p->mm.shadow_ht_extras[shadow_ht_extra_size]) = NULL;
+ *((struct shadow_status ** )
+ &m->shadow_ht_extras[shadow_ht_extra_size]) = NULL;
- spin_unlock(&p->mm.shadow_lock);
+ spin_unlock(&m->shadow_lock);
// call shadow_mk_pagetable
- p->mm.shadow_table = shadow_mk_pagetable( p,
- pagetable_val(p->mm.pagetable) );
+ shadow_mk_pagetable( m );
return 0;
nomem:
- spin_unlock(&p->mm.shadow_lock);
+ spin_unlock(&m->shadow_lock);
return -ENOMEM;
}
-void shadow_mode_disable( )
+static void shadow_mode_disable( struct mm_struct *m )
{
// free the hash buckets as you go
// free the hashtable itself
}
-
-static inline void free_shadow_page( struct task_struct *p, unsigned int pfn )
+static void shadow_mode_flush( struct mm_struct *m )
{
- unsigned long flags;
- p->mm.shadow_page_count--;
+ // since Dom0 did the hypercall, we should be running with it's page
+ // tables right now. Calling flush on yourself would be really
+ // stupid.
- spin_lock_irqsave(&free_list_lock, flags);
- list_add(&frame_table[pfn].list, &free_list);
- free_pfns++;
- spin_unlock_irqrestore(&free_list_lock, flags);
-}
+ if ( m == ¤t->mm )
+ {
+ printk("Don't try and flush your own page tables!\n");
+ return;
+ }
+
+ spin_lock(&m->shadow_lock);
+ __free_shadow_table( m );
+ spin_unlock(&m->shadow_lock);
-static inline struct pfn_info *alloc_shadow_page( struct task_struct *p )
-{
- p->mm.shadow_page_count++;
+ // call shadow_mk_pagetable
+ shadow_mk_pagetable( m );
- return alloc_domain_page( NULL );
}
-static void __free_shadow_table( struct task_struct *p )
+int shadow_mode_control( struct task_struct *p, unsigned int op )
{
- int j;
- struct shadow_status *a;
-
- // the code assumes you're not using the page tables i.e.
- // the domain is stopped and cr3 is something else!!
+ int we_paused = 0;
+
+ // don't call if already shadowed...
- // walk the hash table and call free_shadow_page on all pages
+ // sychronously stop domain
+ if( !(p->state & TASK_STOPPED) && !(p->state & TASK_PAUSED))
+ {
+ sched_pause_sync(p);
+ printk("paused domain\n");
+ we_paused = 1;
+ }
- for(j=0;j<shadow_ht_buckets;j++)
+ if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF )
{
- a = &p->mm.shadow_ht[j];
- if (a->pfn)
- {
- free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask );
- a->pfn = 0;
- a->spfn_and_flags = 0;
- }
- a=a->next;
- while(a)
- {
- struct shadow_status *next = a->next;
- free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask );
- a->pfn = 0;
- a->spfn_and_flags = 0;
- a->next = p->mm.shadow_ht_free;
- p->mm.shadow_ht_free = a;
- a=next;
- }
+ shadow_mode_disable(&p->mm);
}
+ else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST )
+ {
+ shadow_mode_disable(&p->mm);
+ shadow_mode_enable(&p->mm, SHM_test);
+ }
+ else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH )
+ {
+ shadow_mode_flush(&p->mm);
+ }
+ else
+ {
+ return -EINVAL;
+ }
+
+ if ( we_paused ) wake_up(p);
+ return 0;
}
-static void flush_shadow_table( struct task_struct *p )
-{
-
- // XXX synchronously stop domain (needed for SMP guests)
- // switch to idle task's page tables
-
- // walk the hash table and call free_shadow_page on all pages
- spin_lock(&p->mm.shadow_lock);
- __free_shadow_table( p );
- spin_unlock(&p->mm.shadow_lock);
- // XXX unpause domain
+static inline struct pfn_info *alloc_shadow_page( struct mm_struct *m )
+{
+ m->shadow_page_count++;
+
+ return alloc_domain_page( NULL );
}
// even in the SMP guest case, there won't be a race here as
// this CPU was the one that cmpxchg'ed the page to invalid
- spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
- delete_shadow_status(current, gpfn);
+ spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask;
+ delete_shadow_status(¤t->mm, gpfn);
#if 0 // XXX leave as might be useful for later debugging
{
else
perfc_decr(shadow_l2_pages);
- free_shadow_page( current, spfn );
+ free_shadow_page( ¤t->mm, spfn );
}
-static unsigned long shadow_l2_table(
- struct task_struct *p, unsigned long gpfn )
+unsigned long shadow_l2_table(
+ struct mm_struct *m, unsigned long gpfn )
{
struct pfn_info *spfn_info;
unsigned long spfn;
int i;
SH_VVLOG("shadow_l2_table( %08lx )",gpfn);
- spin_lock(&p->mm.shadow_lock);
perfc_incrc(shadow_l2_table_count);
perfc_incr(shadow_l2_pages);
// XXX in future, worry about racing in SMP guests
// -- use cmpxchg with PSH_pending flag to show progress (and spin)
- spfn_info = alloc_shadow_page(p);
+ spfn_info = alloc_shadow_page(m);
ASSERT( spfn_info ); // XXX deal with failure later e.g. blow cache
spfn = (unsigned long) (spfn_info - frame_table);
// mark pfn as being shadowed, update field to point at shadow
- set_shadow_status(p, gpfn, spfn | PSH_shadowed);
+ set_shadow_status(m, gpfn, spfn | PSH_shadowed);
// we need to do this before the linear map is set up
spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
SH_VLOG("shadow_l2_table( %08lx -> %08lx)",gpfn,spfn);
- spin_unlock(&p->mm.shadow_lock);
return spfn;
}
-pagetable_t shadow_mk_pagetable( struct task_struct *p,
- unsigned long gptbase)
-{
- unsigned long gpfn, spfn=0;
-
- SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
- gptbase, p->mm.shadow_mode );
-
- if ( likely(p->mm.shadow_mode) ) // should always be true if we're here
- {
- gpfn = gptbase >> PAGE_SHIFT;
-
- if ( unlikely((spfn=__shadow_status(p, gpfn)) == 0 ) )
- {
- spfn = shadow_l2_table(p, gpfn );
- }
- }
-
- SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
- gptbase, p->mm.shadow_mode );
-
- return mk_pagetable(spfn<<PAGE_SHIFT);
-}
int shadow_fault( unsigned long va, long error_code )
{
gl1pfn = gpde>>PAGE_SHIFT;
- if ( ! (sl1pfn=__shadow_status(current, gl1pfn) ) )
+ if ( ! (sl1pfn=__shadow_status(¤t->mm, gl1pfn) ) )
{
// this L1 is NOT already shadowed so we need to shadow it
struct pfn_info *sl1pfn_info;
perfc_incrc(shadow_l1_table_count);
perfc_incr(shadow_l1_pages);
- set_shadow_status(current, gl1pfn, PSH_shadowed | sl1pfn);
+ set_shadow_status(¤t->mm, gl1pfn, PSH_shadowed | sl1pfn);
gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
spde = (gpde & ~PAGE_MASK) | _PAGE_RW | (sl1pfn<<PAGE_SHIFT);
l1_pgentry_t * spl1e, * prev_spl1e = *prev_spl1e_ptr;
-SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%08lx\n",
+SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%p\n",
pa,gpte,prev_spfn, prev_spl1e);
// to get here, we know the l1 page *must* be shadowed
gpfn = pa >> PAGE_SHIFT;
- spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
+ spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask;
if ( spfn == prev_spfn )
{
// to get here, we know the l2 page has a shadow
gpfn = pa >> PAGE_SHIFT;
- spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
+ spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask;
spte = 0;
if( gpte & _PAGE_PRESENT )
- s_sh = __shadow_status(current, gpte >> PAGE_SHIFT);
+ s_sh = __shadow_status(¤t->mm, gpte >> PAGE_SHIFT);
sp2le = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
// no real need for a cache here
#define FAIL(_f, _a...) \
{printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", sh_check_name, level, i, ## _a , gpte, spte ); BUG();}
-static int check_pte( struct task_struct *p,
+static int check_pte( struct mm_struct *m,
unsigned long gpte, unsigned long spte, int level, int i )
{
unsigned long mask, gpfn, spfn;
}
-static int check_l1_table( struct task_struct *p, unsigned long va,
+static int check_l1_table( struct mm_struct *m, unsigned long va,
unsigned long g2, unsigned long s2 )
{
int j;
#define FAILPT(_f, _a...) \
{printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); BUG();}
-int check_pagetable( struct task_struct *p, pagetable_t pt, char *s )
+int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s )
{
unsigned long gptbase = pagetable_val(pt);
unsigned long gpfn, spfn;
/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */
-#ifndef _XENO_SHADOW_H
-#define _XENO_SHADOW_H
+#ifndef _XEN_SHADOW_H
+#define _XEN_SHADOW_H
#include <xen/config.h>
#include <xen/types.h>
-#include <xen/mm.h>
#include <xen/perfc.h>
+#include <asm/processor.h>
+
/* Shadow PT flag bits in pfn_info */
#define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
#define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
extern int shadow_mode_control( struct task_struct *p, unsigned int op );
-extern pagetable_t shadow_mk_pagetable( struct task_struct *p,
- unsigned long gptbase);
extern int shadow_fault( unsigned long va, long error_code );
extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
unsigned long *prev_spfn_ptr,
l1_pgentry_t **prev_spl1e_ptr );
extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
extern void unshadow_table( unsigned long gpfn, unsigned int type );
-extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
+extern int shadow_mode_enable( struct mm_struct *m, unsigned int mode );
+extern unsigned long shadow_l2_table(
+ struct mm_struct *m, unsigned long gpfn );
#define SHADOW_DEBUG 0
#define SHADOW_HASH_DEBUG 0
#if SHADOW_HASH_DEBUG
-static void shadow_audit(struct task_struct *p, int print)
+static void shadow_audit(struct mm_struct *m, int print)
{
int live=0, free=0, j=0, abs;
struct shadow_status *a;
#define shadow_audit(p, print)
#endif
-static inline struct shadow_status* hash_bucket( struct task_struct *p,
+
+
+static inline struct shadow_status* hash_bucket( struct mm_struct *m,
unsigned int gpfn )
{
- return &(p->mm.shadow_ht[gpfn % shadow_ht_buckets]);
+ return &(m->shadow_ht[gpfn % shadow_ht_buckets]);
}
-static inline unsigned long __shadow_status( struct task_struct *p,
+static inline unsigned long __shadow_status( struct mm_struct *m,
unsigned int gpfn )
{
- struct shadow_status **ob, *b, *B = hash_bucket( p, gpfn );
+ struct shadow_status **ob, *b, *B = hash_bucket( m, gpfn );
b = B;
ob = NULL;
- SH_VVLOG("lookup gpfn=%08lx bucket=%08lx", gpfn, b );
- shadow_audit(p,0); // if in debug mode
+ SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, b );
+ shadow_audit(m,0); // if in debug mode
do
{
ever becomes a problem, but since we need a spin lock on the hash table
anyway its probably not worth being too clever. */
-static inline unsigned long get_shadow_status( struct task_struct *p,
+static inline unsigned long get_shadow_status( struct mm_struct *m,
unsigned int gpfn )
{
unsigned long res;
- spin_lock(&p->mm.shadow_lock);
- res = __shadow_status( p, gpfn );
- if (!res) spin_unlock(&p->mm.shadow_lock);
+ spin_lock(&m->shadow_lock);
+ res = __shadow_status( m, gpfn );
+ if (!res) spin_unlock(&m->shadow_lock);
return res;
}
-static inline void put_shadow_status( struct task_struct *p )
+static inline void put_shadow_status( struct mm_struct *m )
{
- spin_unlock(&p->mm.shadow_lock);
+ spin_unlock(&m->shadow_lock);
}
-static inline void delete_shadow_status( struct task_struct *p,
+static inline void delete_shadow_status( struct mm_struct *m,
unsigned int gpfn )
{
struct shadow_status *b, *B, **ob;
- B = b = hash_bucket( p, gpfn );
+ B = b = hash_bucket( m, gpfn );
SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, b );
- shadow_audit(p,0);
+ shadow_audit(m,0);
ASSERT(gpfn);
if( b->pfn == gpfn )
b->pfn = b->next->pfn;
b->next = b->next->next;
- D->next = p->mm.shadow_ht_free;
- p->mm.shadow_ht_free = D;
+ D->next = m->shadow_ht_free;
+ m->shadow_ht_free = D;
}
else
{
}
#if SHADOW_HASH_DEBUG
- if( __shadow_status(p,gpfn) ) BUG();
+ if( __shadow_status(m,gpfn) ) BUG();
#endif
return;
}
// b is in the list
*ob=b->next;
- b->next = p->mm.shadow_ht_free;
- p->mm.shadow_ht_free = b;
+ b->next = m->shadow_ht_free;
+ m->shadow_ht_free = b;
#if SHADOW_HASH_DEBUG
- if( __shadow_status(p,gpfn) ) BUG();
+ if( __shadow_status(m,gpfn) ) BUG();
#endif
return;
}
}
-static inline void set_shadow_status( struct task_struct *p,
+static inline void set_shadow_status( struct mm_struct *m,
unsigned int gpfn, unsigned long s )
{
struct shadow_status *b, *B, *extra, **fptr;
int i;
- B = b = hash_bucket( p, gpfn );
+ B = b = hash_bucket( m, gpfn );
ASSERT(gpfn);
ASSERT(s);
SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, b, b->next );
- shadow_audit(p,0);
+ shadow_audit(m,0);
do
{
return;
}
- if( unlikely(p->mm.shadow_ht_free == NULL) )
+ if( unlikely(m->shadow_ht_free == NULL) )
{
SH_LOG("allocate more shadow hashtable blocks");
sizeof(struct shadow_status)) );
// add extras to free list
- fptr = &p->mm.shadow_ht_free;
+ fptr = &m->shadow_ht_free;
for ( i=0; i<shadow_ht_extra_size; i++ )
{
*fptr = &extra[i];
}
*fptr = NULL;
- *((struct shadow_status ** ) &p->mm.shadow_ht[shadow_ht_extra_size]) =
- p->mm.shadow_ht_extras;
- p->mm.shadow_ht_extras = extra;
+ *((struct shadow_status ** ) &m->shadow_ht[shadow_ht_extra_size]) =
+ m->shadow_ht_extras;
+ m->shadow_ht_extras = extra;
}
// should really put this in B to go right to front
- b = p->mm.shadow_ht_free;
- p->mm.shadow_ht_free = b->next;
+ b = m->shadow_ht_free;
+ m->shadow_ht_free = b->next;
b->spfn_and_flags = s;
b->pfn = gpfn;
b->next = B->next;
return;
}
+static inline void shadow_mk_pagetable( struct mm_struct *mm )
+{
+ unsigned long gpfn, spfn=0;
+
+ SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
+ pagetable_val(mm->pagetable), mm->shadow_mode );
+
+ if ( unlikely(mm->shadow_mode) )
+ {
+ gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
+
+ spin_lock(&mm->shadow_lock);
+ if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) )
+ {
+ spfn = shadow_l2_table(mm, gpfn );
+ }
+ mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
+ spin_unlock(&mm->shadow_lock);
+ }
+
+ SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx",
+ pagetable_val(mm->pagetable), mm->shadow_mode,
+ pagetable_val(mm->shadow_table) );
+
+}
+
#if SHADOW_DEBUG
-extern int check_pagetable( struct task_struct *p, pagetable_t pt, char *s );
+extern int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s );
#else
-#define check_pagetable( p, pt, s )
+#define check_pagetable( m, pt, s )
#endif
-#endif
+#endif /* XEN_SHADOW_H */